#endif
}
+/*
+ * We need sometimes to load the physical address of a kernel
+ * object. Often we can convert the virtual address to physical
+ * at execution time, but sometimes (either for performance reasons
+ * or during error recovery) we cannot to this. Patch the marked
+ * bundles to load the physical address.
+ */
+void __init
+ia64_patch_vtop (unsigned long start, unsigned long end)
+{
+ s32 *offp = (s32 *)start;
+ u64 ip;
+
+ while (offp < (s32 *)end) {
+ ip = (u64)offp + *offp;
+
+ /* replace virtual address with corresponding physical address */
+ ia64_patch_imm64(ip, ia64_tpa(get_imm64(ip)));
+ ia64_fc((void *)ip);
+ ++offp;
+ }
+ ia64_sync_i();
+ ia64_srlz_i();
+}
void __init xen_patch_kernel(void)
{
.section ".data.patch.vtop", "a" // declare section & section attributes
.previous
-#ifdef XEN
-#define LOAD_PHYSICAL(pr, reg, obj) \
-[1:](pr)movl reg = obj;; \
- shl reg = reg,4;; \
- shr.u reg = reg,4;; \
- .xdata4 ".data.patch.vtop", 1b-.
-#else
#define LOAD_PHYSICAL(pr, reg, obj) \
[1:](pr)movl reg = obj; \
.xdata4 ".data.patch.vtop", 1b-.
-#endif
/*
* For now, we always put in the McKinley E9 workaround. On CPUs that don't need it,